--- /dev/null
+/******************************************************************************
+ * preempt.c
+ *
+ * Track atomic regions in the hypervisor which disallow sleeping.
+ *
+ * Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/preempt.h>
+
+DEFINE_PER_CPU(unsigned int, __preempt_count);
#include <xen/time.h>
#include <xen/spinlock.h>
#include <xen/guest_access.h>
+#include <xen/preempt.h>
#include <public/sysctl.h>
#include <asm/processor.h>
atomic_dec(&spin_debug);
}
-static DEFINE_PER_CPU(atomic_t, lockdepth);
-
-#define lockdepth_inc() atomic_inc(&this_cpu(lockdepth))
-#define lockdepth_dec() atomic_dec(&this_cpu(lockdepth))
-
-unsigned int locking_depth(void)
-{
- return atomic_read(&this_cpu(lockdepth));
-}
-
#else /* defined(NDEBUG) */
#define check_lock(l) ((void)0)
-#define lockdepth_inc() ((void)0)
-#define lockdepth_dec() ((void)0)
-unsigned int locking_depth(void) { return 0; }
#endif
cpu_relax();
}
LOCK_PROFILE_GOT;
- lockdepth_inc();
+ preempt_disable();
}
void _spin_lock_irq(spinlock_t *lock)
local_irq_disable();
}
LOCK_PROFILE_GOT;
- lockdepth_inc();
+ preempt_disable();
}
unsigned long _spin_lock_irqsave(spinlock_t *lock)
local_irq_save(flags);
}
LOCK_PROFILE_GOT;
- lockdepth_inc();
+ preempt_disable();
return flags;
}
void _spin_unlock(spinlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
LOCK_PROFILE_REL;
_raw_spin_unlock(&lock->raw);
}
void _spin_unlock_irq(spinlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
LOCK_PROFILE_REL;
_raw_spin_unlock(&lock->raw);
local_irq_enable();
void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
- lockdepth_dec();
+ preempt_enable();
LOCK_PROFILE_REL;
_raw_spin_unlock(&lock->raw);
local_irq_restore(flags);
#ifdef LOCK_PROFILE
lock->profile.time_locked = NOW();
#endif
- lockdepth_inc();
+ preempt_disable();
return 1;
}
{
check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
}
void _read_lock_irq(rwlock_t *lock)
local_irq_disable();
check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
}
unsigned long _read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
check_lock(&lock->debug);
_raw_read_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
return flags;
}
void _read_unlock(rwlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
_raw_read_unlock(&lock->raw);
}
void _read_unlock_irq(rwlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
_raw_read_unlock(&lock->raw);
local_irq_enable();
}
void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- lockdepth_dec();
+ preempt_enable();
_raw_read_unlock(&lock->raw);
local_irq_restore(flags);
}
{
check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
}
void _write_lock_irq(rwlock_t *lock)
local_irq_disable();
check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
}
unsigned long _write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
check_lock(&lock->debug);
_raw_write_lock(&lock->raw);
- lockdepth_inc();
+ preempt_disable();
return flags;
}
check_lock(&lock->debug);
if ( !_raw_write_trylock(&lock->raw) )
return 0;
- lockdepth_inc();
+ preempt_disable();
return 1;
}
void _write_unlock(rwlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
_raw_write_unlock(&lock->raw);
}
void _write_unlock_irq(rwlock_t *lock)
{
- lockdepth_dec();
+ preempt_enable();
_raw_write_unlock(&lock->raw);
local_irq_enable();
}
void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- lockdepth_dec();
+ preempt_enable();
_raw_write_unlock(&lock->raw);
local_irq_restore(flags);
}
--- /dev/null
+/******************************************************************************
+ * preempt.h
+ *
+ * Track atomic regions in the hypervisor which disallow sleeping.
+ *
+ * Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PREEMPT_H__
+#define __XEN_PREEMPT_H__
+
+#include <xen/config.h>
+#include <xen/percpu.h>
+#include <xen/irq.h> /* in_irq() */
+#include <asm/system.h> /* local_irq_is_enabled() */
+
+DECLARE_PER_CPU(unsigned int, __preempt_count);
+
+#define preempt_count() (this_cpu(__preempt_count))
+
+#define preempt_disable() do { \
+ preempt_count()++; \
+ barrier(); \
+} while (0)
+
+#define preempt_enable() do { \
+ barrier(); \
+ preempt_count()--; \
+} while (0)
+
+#define in_atomic() (preempt_count() || in_irq() || !local_irq_is_enabled())
+
+#endif /* __XEN_PREEMPT_H__ */
#include <xen/spinlock.h>
#include <xen/percpu.h>
#include <xen/cpumask.h>
+#include <xen/preempt.h>
/**
* struct rcu_head - callback structure for use with RCU
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock(x) ((void)(x))
+#define rcu_read_lock(x) ({ ((void)(x)); preempt_disable(); })
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
-#define rcu_read_unlock(x) ((void)(x))
+#define rcu_read_unlock(x) ({ ((void)(x)); preempt_enable(); })
/*
* So where is rcu_write_lock()? It does not exist, as there is no